if ( !cpu_has_apic )
goto done;
- pdev = pci_get_pdev_by_domain(d, msi->seg, msi->bus, msi->devfn);
+ pdev = pci_get_pdev(d, PCI_SBDF(msi->seg, msi->bus, msi->devfn));
if ( !pdev )
goto done;
if ( vf >= 0 )
{
- struct pci_dev *pdev = pci_get_pdev(seg, bus, PCI_DEVFN(slot, func));
+ struct pci_dev *pdev = pci_get_pdev(NULL,
+ PCI_SBDF(seg, bus, slot, func));
unsigned int pos = pci_find_ext_capability(seg, bus,
PCI_DEVFN(slot, func),
PCI_EXT_CAP_ID_SRIOV);
struct msi_desc *old_desc;
ASSERT(pcidevs_locked());
- pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(msi->seg, msi->bus, msi->devfn));
if ( !pdev )
return -ENODEV;
struct msi_desc *old_desc;
ASSERT(pcidevs_locked());
- pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(msi->seg, msi->bus, msi->devfn));
if ( !pdev || !pdev->msix )
return -ENODEV;
return 0;
pcidevs_lock();
- pdev = pci_get_pdev(seg, bus, devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bus, devfn));
if ( !pdev )
rc = -ENODEV;
else if ( pdev->msix->used_entries != !!off )
pcidevs_lock();
- pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN(bdf));
+ pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bdf));
if ( pdev )
rc = pci_msi_conf_write_intercept(pdev, reg, size, data);
break;
pcidevs_lock();
- pdev = pci_get_pdev(0, restore_msi.bus, restore_msi.devfn);
+ pdev = pci_get_pdev(NULL,
+ PCI_SBDF(0, restore_msi.bus, restore_msi.devfn));
ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
pcidevs_unlock();
break;
break;
pcidevs_lock();
- pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(dev.seg, dev.bus, dev.devfn));
ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
pcidevs_unlock();
break;
}
pcidevs_lock();
- pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(dev.seg, dev.bus, dev.devfn));
if ( !pdev )
node = XEN_INVALID_DEV;
else if ( pdev->node == NUMA_NO_NODE )
}
pcidevs_lock();
- iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf),
- PCI_DEVFN(iommu->bdf));
+ iommu->msi.dev = pci_get_pdev(NULL, PCI_SBDF(iommu->seg, iommu->bdf));
pcidevs_unlock();
if ( !iommu->msi.dev )
{
if ( !pci_init )
continue;
pcidevs_lock();
- pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN(bdf));
+ pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bdf));
pcidevs_unlock();
}
const struct pci_dev *pdev;
pcidevs_lock();
- pdev = pci_get_pdev(seg, sbdf.bus, sbdf.devfn);
+ pdev = pci_get_pdev(NULL, sbdf);
pcidevs_unlock();
if ( pdev )
return 0;
}
-struct pci_dev *pci_get_pdev(uint16_t seg, uint8_t bus, uint8_t devfn)
-{
- const struct pci_seg *pseg = get_pseg(seg);
- struct pci_dev *pdev;
-
- ASSERT(pcidevs_locked());
-
- if ( !pseg )
- return NULL;
-
- list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
- if ( pdev->bus == bus && pdev->devfn == devfn )
- return pdev;
-
- return NULL;
-}
-
struct pci_dev *pci_get_real_pdev(int seg, int bus, int devfn)
{
struct pci_dev *pdev;
if ( seg < 0 || bus < 0 || devfn < 0 )
return NULL;
- for ( pdev = pci_get_pdev(seg, bus, devfn), stride = 4;
+ for ( pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bus, devfn)), stride = 4;
!pdev && stride; stride >>= 1 )
{
if ( !(devfn & (8 - stride)) )
continue;
- pdev = pci_get_pdev(seg, bus, devfn & ~(8 - stride));
+ pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bus, devfn & ~(8 - stride)));
if ( pdev && stride != pdev->phantom_stride )
pdev = NULL;
}
return pdev;
}
-struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, uint16_t seg,
- uint8_t bus, uint8_t devfn)
+struct pci_dev *pci_get_pdev(const struct domain *d, pci_sbdf_t sbdf)
{
struct pci_dev *pdev;
+ ASSERT(d || pcidevs_locked());
+
/*
* The hardware domain owns the majority of the devices in the system.
* When there are multiple segments, traversing the per-segment list is
* likely going to be faster, whereas for a single segment the difference
* shouldn't be that large.
*/
- if ( is_hardware_domain(d) )
+ if ( !d || is_hardware_domain(d) )
{
- const struct pci_seg *pseg = get_pseg(seg);
+ const struct pci_seg *pseg = get_pseg(sbdf.seg);
if ( !pseg )
return NULL;
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
- if ( pdev->bus == bus && pdev->devfn == devfn &&
- pdev->domain == d )
+ if ( pdev->sbdf.bdf == sbdf.bdf &&
+ (!d || pdev->domain == d) )
return pdev;
}
else
list_for_each_entry ( pdev, &d->pdev_list, domain_list )
- if ( pdev->bus == bus && pdev->devfn == devfn )
+ if ( pdev->sbdf.bdf == sbdf.bdf )
return pdev;
return NULL;
else if ( info->is_virtfn )
{
pcidevs_lock();
- pdev = pci_get_pdev(seg, info->physfn.bus, info->physfn.devfn);
+ pdev = pci_get_pdev(NULL,
+ PCI_SBDF(seg, info->physfn.bus,
+ info->physfn.devfn));
if ( pdev )
pf_is_extfn = pdev->info.is_extfn;
pcidevs_unlock();
return -EINVAL;
ASSERT(pcidevs_locked());
- pdev = pci_get_pdev_by_domain(d, seg, bus, devfn);
+ pdev = pci_get_pdev(d, PCI_SBDF(seg, bus, devfn));
if ( !pdev )
return -ENODEV;
{
for ( devfn = 0; devfn < 256; devfn++ )
{
- struct pci_dev *pdev = pci_get_pdev(pseg->nr, bus, devfn);
+ struct pci_dev *pdev = pci_get_pdev(NULL,
+ PCI_SBDF(pseg->nr, bus, devfn));
if ( !pdev )
continue;
int rc = 0;
ASSERT(pcidevs_locked());
- pdev = pci_get_pdev(seg, bus, devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bus, devfn));
if ( !pdev )
rc = -ENODEV;
/* device_assigned() should already have cleared the device for assignment */
ASSERT(pcidevs_locked());
- pdev = pci_get_pdev(seg, bus, devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(seg, bus, devfn));
ASSERT(pdev && (pdev->domain == hardware_domain ||
pdev->domain == dom_io));
int rc;
/* find ME VT-d engine base on a real ME device */
- pdev = pci_get_pdev(0, 0, PCI_DEVFN(dev, 0));
+ pdev = pci_get_pdev(NULL, PCI_SBDF(0, 0, dev, 0));
drhd = acpi_find_matched_drhd_unit(pdev);
/* map or unmap ME phantom function */
u8 b = bus, df = devfn, sb;
pcidevs_lock();
- pdev = pci_get_pdev(0, bus, devfn);
+ pdev = pci_get_pdev(NULL, PCI_SBDF(0, bus, devfn));
pcidevs_unlock();
if ( !pdev ||
}
/* Find the PCI dev matching the address. */
- pdev = pci_get_pdev_by_domain(d, sbdf.seg, sbdf.bus, sbdf.devfn);
+ pdev = pci_get_pdev(d, sbdf);
if ( !pdev )
return vpci_read_hw(sbdf, reg, size);
* Find the PCI dev matching the address.
* Passthrough everything that's not trapped.
*/
- pdev = pci_get_pdev_by_domain(d, sbdf.seg, sbdf.bus, sbdf.devfn);
+ pdev = pci_get_pdev(d, sbdf);
if ( !pdev )
{
vpci_write_hw(sbdf, reg, size, data);
int pci_remove_device(u16 seg, u8 bus, u8 devfn);
int pci_ro_device(int seg, int bus, int devfn);
int pci_hide_device(unsigned int seg, unsigned int bus, unsigned int devfn);
-struct pci_dev *pci_get_pdev(uint16_t seg, uint8_t bus, uint8_t devfn);
+struct pci_dev *pci_get_pdev(const struct domain *d, pci_sbdf_t sbdf);
struct pci_dev *pci_get_real_pdev(int seg, int bus, int devfn);
-struct pci_dev *pci_get_pdev_by_domain(const struct domain *, uint16_t seg,
- uint8_t bus, uint8_t devfn);
void pci_check_disable_device(u16 seg, u8 bus, u8 devfn);
uint8_t pci_conf_read8(pci_sbdf_t sbdf, unsigned int reg);